Part 1

Priors

Sample Posterior Distributions of Mean Intensity parameter (Gibbs Sampling)

# Gibbs sampler
# Dataset 1 
  post.1 <- rgamma(10000,shape=kappa.prior1+sum(lemurs1[,1]),scale=theta.prior1/(N1*theta.prior1+1))
  post.2 <- rgamma(10000,shape=kappa.prior2+sum(lemurs1[,1]),scale=theta.prior2/(N1*theta.prior2+1))
# Dataset 2
  post.3 <- rgamma(10000,shape=kappa.prior1+sum(lemurs2[,1]),scale=theta.prior1/(N2*theta.prior1+1))
  post.4 <- rgamma(10000,shape=kappa.prior2+sum(lemurs2[,1]),scale=theta.prior2/(N2*theta.prior2+1))

Plot posterior Distributions

Posterior Means and 95% CIs

##         Prior1lemurs1 Prior2lemurs1 Prior2lemurs2 Prior2lemurs2
## MLE         0.5555556     0.5555556     0.4545455     0.4545455
## mean        0.9641204     0.7898904     0.7282838     0.5046764
## low.ci      0.7866410     0.4385703     0.6142298     0.3813659
## high.ci     1.1555563     1.2393226     0.8522200     0.6433541

The highly informative prior had a strong influence on the posterior distribution of the small sample size data set and a bit less, but still a strong influence on the large sample size data set. The less informative prior had a moderate effect on the low sample size data and very little effect on the large sample size data set.

Part 2

Model Fitting with JAGS

#JAGS data lists (different for 2 datasets and 2 priors)
  data1.prior1 <- list(
                      y = lemurs1[,1],
                      N = length(lemurs1[,1]),
                      prior.r = kappa.prior1,
                      prior.lambda = 1/theta.prior1
            )

  data1.prior2 <- list(
                        y=lemurs1[,1],
                        N=length(lemurs1[,1]),
                        prior.r = kappa.prior2,
                        prior.lambda = 1/theta.prior2
            )

  data2.prior1 <- list(
                      y=lemurs2[,1],
                      N=length(lemurs2[,1]),
                      prior.r = kappa.prior1,
                      prior.lambda = 1/theta.prior1
            )

  data2.prior2 <- list(
                        y=lemurs2[,1],
                        N=length(lemurs2[,1]),
                        prior.r = kappa.prior2,
                        prior.lambda = 1/theta.prior2
            )
  
  
#MCMC inputs  (same across priors and datasets)
  n.chains = 2
  n.adapt = 1000
  n.iter = 10000
  thin = 2
  burn = 5000

# Model Parameters to save values of (same across priors and datasets)
  parms <- c("lambda")  


# Setup the Models
  jm1 <- jags.model(file="model.jags.binomial.gamma.r", data = data1.prior1, n.chains = n.chains)
  jm2 <- jags.model(file="model.jags.binomial.gamma.r", data = data1.prior2, n.chains = n.chains)
  jm3 <- jags.model(file="model.jags.binomial.gamma.r", data = data2.prior1, n.chains = n.chains)
  jm4 <- jags.model(file="model.jags.binomial.gamma.r", data = data2.prior2, n.chains = n.chains)

# Update the models with the burnin
  update(jm1, n.iter = burn, n.adapt = n.adapt)
  update(jm2, n.iter = burn, n.adapt = n.adapt)
  update(jm3, n.iter = burn, n.adapt = n.adapt)
  update(jm4, n.iter = burn, n.adapt = n.adapt)

# Fit the models
  post1=coda.samples(jm1, variable.names = parms, n.iter = n.iter, thin = thin)
  post2=coda.samples(jm2, variable.names = parms, n.iter = n.iter, thin = thin)
  post3=coda.samples(jm3, variable.names = parms, n.iter = n.iter, thin = thin)
  post4=coda.samples(jm4, variable.names = parms, n.iter = n.iter, thin = thin)

Plots of posteriors for lambda

We gain the same inference using JAGS as done using our conjugate prior knowledge and gibbs sampling.

Posterior means and 95% CIs

##         Prior1lemurs1 Prior2lemurs1 Prior2lemurs2 Prior2lemurs2
## MLE         0.5555556     0.5555556     0.4545455     0.4545455
## mean        0.9629752     0.7902618     0.7278967     0.5043869
## low.ci      0.7846136     0.4443246     0.6123263     0.3826316
## high.ci     1.1559555     1.2221920     0.8540962     0.6461998

Stan Model Implementatinon

Using the small sample data set

Below is using default priors, rather than prior set above.

library(rstanarm)
dat=data.frame(y=lemurs1)
colnames(dat)="y"


post1 <- stan_glm(y~1, data = dat,
                 family = poisson(link = "log"))

plot(post1)

summary(post1)
post1$coefficients

post1$fitted.values

lambda.posterior = exp(post1$stanfit@sim$samples[[1]]$`alpha[1]`)

#plot the posterior distribution of the mean intensity
plot(density(lambda.posterior,adjust = 2),lwd=3,xlim=c(0,2),
     main="Small sample data and default priors")

#The default prior
post1$prior.info
## $prior
## NULL
## 
## $prior_intercept
## $prior_intercept$dist
## [1] "normal"
## 
## $prior_intercept$location
## [1] 0
## 
## $prior_intercept$scale
## [1] 2.5
## 
## $prior_intercept$adjusted_scale
## NULL
## 
## $prior_intercept$df
## NULL
#plot the prior
curve(dnorm(x,0,2.5),lwd=3,xlim=c(-10,10))